XEN_TARGET_ARCH ?= $(XEN_COMPILE_ARCH)
XEN_OS ?= $(shell uname -s)
-ifeq ($(XEN_TARGET_ARCH),x86_32)
-XEN_TARGET_X86_PAE ?= y
-endif
-
CONFIG_$(XEN_OS) := y
SHELL ?= /bin/sh
export XEN_TARGET_ARCH SUBARCH XEN_SYSTYPE
include buildconfigs/Rules.mk
-ifeq ($(XEN_TARGET_X86_PAE),y)
-export pae=y
-endif
-
# build and install everything into the standard system directories
.PHONY: install
install: install-xen install-kernels install-tools install-docs
sh $(XEN_LINUX_CONFIG_UPDATE) $(CONFIG_FILE)
endif
ifeq ($(XEN_TARGET_ARCH),x86_32)
-ifeq ($(pae),y)
sed -e 's!^CONFIG_HIGHMEM4G=y$$!\# CONFIG_HIGHMEM4G is not set!;s!^\# CONFIG_HIGHMEM64G is not set$$!CONFIG_HIGHMEM64G=y!' $(CONFIG_FILE) > $(CONFIG_FILE)- && mv $(CONFIG_FILE)- $(CONFIG_FILE)
-else
- grep '^CONFIG_HIGHMEM64G=y' $(CONFIG_FILE) >/dev/null && ( sed -e 's!^CONFIG_HIGHMEM64G=y$$!\# CONFIG_HIGHMEM64G is not set!;s!^\# CONFIG_HIGHMEM4G is not set$$!CONFIG_HIGHMEM4G=y!' $(CONFIG_FILE) > $(CONFIG_FILE)- && mv $(CONFIG_FILE)- $(CONFIG_FILE) ) || true
-endif
endif
ifneq ($(EXTRAVERSION),)
echo "$(EXTRAVERSION)" >$(LINUX_DIR)/localversion-xen
-# This tree only supports PAE
XEN_TARGET_ARCH = x86_32
-XEN_TARGET_X86_PAE = y
EXTRAVERSION = -xen
LINUX_VER = 2.6.5-SLES
-# This tree only supports PAE
XEN_TARGET_ARCH = x86_32
-XEN_TARGET_X86_PAE = y
EXTRAVERSION = -xen
LINUX_VER = 2.6.9-RHEL
MINI-OS_ROOT=$(XEN_ROOT)/extras/mini-os
export MINI-OS_ROOT
-ifeq ($(XEN_TARGET_ARCH),x86_32)
-export pae ?= y
-endif
libc = $(stubdom)
XEN_INTERFACE_VERSION := 0x00030205
# Export these variables for possible use in architecture dependent makefiles.
export TARGET_ARCH_DIR
export TARGET_ARCH_FAM
-export XEN_TARGET_X86_PAE
# This is used for architecture specific links.
# This can be overwritten from arch specific rules.
ifneq ($(CAMLDIR),)
caml=y
endif
-
-ifeq ($(pae),y)
-DEF_CPPFLAGS += -DCONFIG_X86_PAE
-endif
ARCH_ASFLAGS := -m32
EXTRA_INC += $(TARGET_ARCH_FAM)/$(XEN_TARGET_ARCH)
EXTRA_SRC += arch/$(EXTRA_INC)
-
-ifeq ($(XEN_TARGET_X86_PAE),y)
-ARCH_CFLAGS += -DCONFIG_X86_PAE=1
-ARCH_ASFLAGS += -DCONFIG_X86_PAE=1
-endif
endif
ifeq ($(XEN_TARGET_ARCH),x86_64)
prot_t = L2_PROT;
pincmd = MMUEXT_PIN_L1_TABLE;
break;
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
case L2_FRAME:
prot_e = L2_PROT;
prot_t = L3_PROT;
pincmd = MMUEXT_PIN_L2_TABLE;
break;
-#endif
#if defined(__x86_64__)
case L3_FRAME:
prot_e = L3_PROT;
/* Update the entry */
#if defined(__x86_64__)
tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
- tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
#endif
-#if defined(CONFIG_X86_PAE)
tab = pte_to_virt(tab[l3_table_offset(pt_page)]);
-#endif
mmu_updates[0].ptr = ((pgentry_t)tab[l2_table_offset(pt_page)] & PAGE_MASK) +
sizeof(pgentry_t) * l1_table_offset(pt_page);
} else
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
if(level == L2_FRAME)
{
#if defined(__x86_64__)
return 1;
} else
-#endif /* defined(__x86_64__) || defined(CONFIG_X86_PAE) */
/* Always need l1 frames */
if(level == L1_FRAME)
mfn = pte_to_mfn(page);
tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
offset = l3_table_offset(start_address);
/* Need new L2 pt frame */
if(!(start_address & L2_MASK))
page = tab[offset];
mfn = pte_to_mfn(page);
tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
offset = l2_table_offset(start_address);
/* Need new L1 pt frame */
if(!(start_address & L1_MASK))
mfn = pte_to_mfn(page);
tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
offset = l3_table_offset(start_address);
page = tab[offset];
mfn = pte_to_mfn(page);
tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
-#endif
offset = l2_table_offset(start_address);
page = tab[offset];
mfn = pte_to_mfn(page);
mfn = pte_to_mfn(tab[offset]);
tab = mfn_to_virt(mfn);
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
offset = l3_table_offset(addr);
if (!(tab[offset] & _PAGE_PRESENT))
return NULL;
mfn = pte_to_mfn(tab[offset]);
tab = mfn_to_virt(mfn);
-#endif
offset = l2_table_offset(addr);
if (!(tab[offset] & _PAGE_PRESENT))
return NULL;
mfn = pte_to_mfn(tab[offset]);
tab = mfn_to_virt(mfn);
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
offset = l3_table_offset(addr);
if (!(tab[offset] & _PAGE_PRESENT)) {
pt_pfn = virt_to_pfn(alloc_page());
ASSERT(tab[offset] & _PAGE_PRESENT);
mfn = pte_to_mfn(tab[offset]);
tab = mfn_to_virt(mfn);
-#endif
offset = l2_table_offset(addr);
if (!(tab[offset] & _PAGE_PRESENT)) {
pt_pfn = virt_to_pfn(alloc_page());
void hypervisor_callback(void);
void failsafe_callback(void);
-#if !defined(CONFIG_X86_PAE)
+#if defined(__x86_64__)
#define __pte(x) ((pte_t) { (x) } )
#else
#define __pte(x) ({ unsigned long long _x = (x); \
tab = pte_to_virt(page);
printk(" L4 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l4_table_offset(addr));
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
page = tab[l3_table_offset(addr)];
tab = pte_to_virt(page);
printk(" L3 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l3_table_offset(addr));
-#endif
page = tab[l2_table_offset(addr)];
tab = pte_to_virt(page);
printk(" L2 = %"PRIpte" (%p) [offset = %lx]\n", page, tab, l2_table_offset(addr));
return 0;
tab = pte_to_virt(page);
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
page = tab[l3_table_offset(addr)];
if (!(page & _PAGE_PRESENT))
return 0;
tab = pte_to_virt(page);
-#endif
+
page = tab[l2_table_offset(addr)];
if (!(page & _PAGE_PRESENT))
return 0;
.ascii ",VIRT_BASE=0x0" /* &_text from minios_x86_32.lds */
.ascii ",ELF_PADDR_OFFSET=0x0"
.ascii ",HYPERCALL_PAGE=0x2"
-#ifdef CONFIG_X86_PAE
.ascii ",PAE=yes"
-#else
- .ascii ",PAE=no"
-#endif
.ascii ",LOADER=generic"
.byte 0
.text
typedef long long quad_t;
typedef unsigned long long u_quad_t;
-#if !defined(CONFIG_X86_PAE)
-typedef struct { unsigned long pte_low; } pte_t;
-#else
typedef struct { unsigned long pte_low, pte_high; } pte_t;
-#endif /* CONFIG_X86_PAE */
#elif defined(__x86_64__) || defined(__ia64__)
typedef long quad_t;
typedef struct { unsigned long pte; } pte_t;
#endif /* __i386__ || __x86_64__ */
-#if !defined(CONFIG_X86_PAE)
+#ifdef __x86_64__
#define __pte(x) ((pte_t) { (x) } )
#else
#define __pte(x) ({ unsigned long long _x = (x); \
#ifdef __ASSEMBLY__
#define __PAGE_SIZE (1 << __PAGE_SHIFT)
#else
-#ifndef CONFIG_X86_PAE
+#ifdef __x86_64__
#define __PAGE_SIZE (1UL << __PAGE_SHIFT)
#else
#define __PAGE_SIZE (1ULL << __PAGE_SHIFT)
#if defined(__i386__)
-#if !defined(CONFIG_X86_PAE)
-
-#define L2_PAGETABLE_SHIFT 22
-
-#define L1_PAGETABLE_ENTRIES 1024
-#define L2_PAGETABLE_ENTRIES 1024
-
-#define PADDR_BITS 32
-#define PADDR_MASK (~0UL)
-
-#define NOT_L1_FRAMES 1
-#define PRIpte "08lx"
-#ifndef __ASSEMBLY__
-typedef unsigned long pgentry_t;
-#endif
-
-#else /* defined(CONFIG_X86_PAE) */
-
#define L2_PAGETABLE_SHIFT 21
#define L3_PAGETABLE_SHIFT 30
typedef uint64_t pgentry_t;
#endif
-#endif /* !defined(CONFIG_X86_PAE) */
-
#elif defined(__x86_64__)
#define L2_PAGETABLE_SHIFT 21
(((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
#define l2_table_offset(_a) \
(((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
#define l3_table_offset(_a) \
(((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
-#endif
#if defined(__x86_64__)
#define l4_table_offset(_a) \
(((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED)
#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY |_PAGE_USER)
-#if defined(CONFIG_X86_PAE)
#define L3_PROT (_PAGE_PRESENT)
-#endif /* CONFIG_X86_PAE */
#elif defined(__x86_64__)
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
#define L1_PROT_RO (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_USER)
#ifndef __ASSEMBLY__
/* Definitions for machine and pseudophysical addresses. */
-#ifdef CONFIG_X86_PAE
+#ifdef __i386__
typedef unsigned long long paddr_t;
typedef unsigned long long maddr_t;
#else
#define virtual_to_l3(_virt) PT_BASE
#endif
-#if defined(__x86_64__) || defined(CONFIG_X86_PAE)
#define virtual_to_l2(_virt) ({ \
unsigned long __virt2 = (_virt); \
(pgentry_t *) pte_to_virt(virtual_to_l3(__virt2)[l3_table_offset(__virt2)]); \
})
-#else
-#define virtual_to_l2(_virt) PT_BASE
-#endif
#define virtual_to_l1(_virt) ({ \
unsigned long __virt1 = (_virt); \
HYPERVISOR_update_va_mapping(
unsigned long va, pte_t new_val, unsigned long flags)
{
- unsigned long pte_hi = 0;
-#ifdef CONFIG_X86_PAE
- pte_hi = new_val.pte_high;
-#endif
return _hypercall4(int, update_va_mapping, va,
- new_val.pte_low, pte_hi, flags);
+ new_val.pte_low, new_val.pte_high, flags);
}
static inline int
HYPERVISOR_update_va_mapping_otherdomain(
unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
{
- unsigned long pte_hi = 0;
-#ifdef CONFIG_X86_PAE
- pte_hi = new_val.pte_high;
-#endif
return _hypercall5(int, update_va_mapping_otherdomain, va,
- new_val.pte_low, pte_hi, flags, domid);
+ new_val.pte_low, new_val.pte_high, flags, domid);
}
static inline int
# If you change any of these configuration options then you must
# 'make clean' before rebuilding.
#
-pae ?= n
supervisor_mode_kernel ?= n
# Solaris grabs stdarg.h and friends from the system include directory.
CFLAGS += $(call cc-option,$(CC),-fno-stack-protector,)
CFLAGS += $(call cc-option,$(CC),-fno-stack-protector-all,)
-ifeq ($(TARGET_SUBARCH)$(pae),x86_32y)
-CFLAGS += -DCONFIG_X86_PAE=1
-endif
-
ifeq ($(supervisor_mode_kernel),y)
CFLAGS += -DCONFIG_X86_SUPERVISOR_MODE_KERNEL=1
endif
mov %eax,sym_phys(idle_pg_table) + (262*8) /* PML4[262]: 1:1 map */
mov $(sym_phys(l3_xenmap)+7),%eax
mov %eax,sym_phys(idle_pg_table) + (261*8) /* PML4[261]: xen map */
-#elif defined(CONFIG_X86_PAE)
+#else
/* Initialize low and high mappings of memory with 2MB pages */
mov $sym_phys(idle_pg_table_l2),%edi
mov $0xe3,%eax /* PRESENT+RW+A+D+2MB */
add $(1<<L2_PAGETABLE_SHIFT),%eax
cmp $(16<<20)+0xe3,%eax
jne 1b
-#else
- /* Initialize low and high mappings of memory with 4MB pages */
- mov $sym_phys(idle_pg_table),%edi
- mov $0xe3,%eax /* PRESENT+RW+A+D+4MB */
-1: mov %eax,__PAGE_OFFSET>>20(%edi) /* high mapping */
- stosl /* low mapping */
- add $(1<<L2_PAGETABLE_SHIFT),%eax
- cmp $DIRECTMAP_PHYS_END+0xe3,%eax
- jne 1b
-1: stosl /* low mappings cover up to 16MB */
- add $(1<<L2_PAGETABLE_SHIFT),%eax
- cmp $(16<<20)+0xe3,%eax
- jne 1b
#endif
/* Initialize 4kB mappings of first 2MB or 4MB of memory. */
#endif
xor %ecx,%ecx
1: stosl
-#if CONFIG_PAGING_LEVELS >= 3
add $4,%edi
-#endif
add $PAGE_SIZE,%eax
inc %ecx
/* VGA hole (0xa0000-0xc0000) should be mapped UC. */
#if defined(__x86_64__)
mov %edi,sym_phys(l2_identmap)
mov %edi,sym_phys(l2_xenmap)
-#elif defined(CONFIG_X86_PAE)
+#else
mov %edi,sym_phys(idle_pg_table_l2)
mov %edi,sym_phys(idle_pg_table_l2) + (__PAGE_OFFSET>>18)
-#else
- mov %edi,sym_phys(idle_pg_table)
- mov %edi,sym_phys(idle_pg_table) + (__PAGE_OFFSET>>20)
#endif
/* Copy bootstrap trampoline to low memory, below 1MB. */
fninit
/* Initialise CR4. */
-#if CONFIG_PAGING_LEVELS == 2
- mov $X86_CR4_PSE,%ecx
-#else
mov $X86_CR4_PAE,%ecx
-#endif
mov %ecx,%cr4
/* Load pagetable base register. */
add bootsym_phys(trampoline_xen_phys_start),%eax
mov %eax,%cr3
-#if CONFIG_PAGING_LEVELS != 2
/* Set up EFER (Extended Feature Enable Register). */
mov bootsym_phys(cpuid_ext_features),%edi
test $0x20100800,%edi /* SYSCALL/SYSRET, No Execute, Long Mode? */
btsl $_EFER_NX,%eax /* No Execute */
1: wrmsr
.Lskip_efer:
-#endif
mov $0x80050033,%eax /* hi-to-lo: PG,AM,WP,NE,ET,MP,PE */
mov %eax,%cr0
/* fpu init? */
/* Initialise CR4. */
-#if CONFIG_PAGING_LEVELS == 2
- mov $X86_CR4_PSE, %ecx
-#else
mov $X86_CR4_PAE, %ecx
-#endif
mov %ecx, %cr4
/* Load pagetable base register */
mov %eax,%cr3
/* Will cpuid feature change after resume? */
-#if CONFIG_PAGING_LEVELS != 2
/* Set up EFER (Extended Feature Enable Register). */
mov bootsym_phys(cpuid_ext_features),%edi
test $0x20100800,%edi /* SYSCALL/SYSRET, No Execute, Long Mode? */
btsl $_EFER_NX,%eax /* No Execute */
1: wrmsr
.Lskip_eferw:
-#endif
wbinvd
.long gdt_table - FIRST_RESERVED_GDT_BYTE
-#ifdef CONFIG_X86_PAE
.align 32
ENTRY(idle_pg_table)
.long sym_phys(idle_pg_table_l2) + 0*PAGE_SIZE + 0x01, 0
.long sym_phys(idle_pg_table_l2) + 1*PAGE_SIZE + 0x01, 0
.long sym_phys(idle_pg_table_l2) + 2*PAGE_SIZE + 0x01, 0
.long sym_phys(idle_pg_table_l2) + 3*PAGE_SIZE + 0x01, 0
-#endif
.align PAGE_SIZE, 0
/* NB. Rings != 0 get access up to MACH2PHYS_VIRT_END. This allows access to */
v_end = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
if ( (v_end - vstack_end) < (512UL << 10) )
v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
-#if defined(__i386__) && !defined(CONFIG_X86_PAE)
- if ( (((v_end - v_start + ((1UL<<L2_PAGETABLE_SHIFT)-1)) >>
- L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
- break;
-#elif defined(__i386__) && defined(CONFIG_X86_PAE)
+#if defined(__i386__)
/* 5 pages: 1x 3rd + 4x 2nd level */
if ( (((v_end - v_start + ((1UL<<L2_PAGETABLE_SHIFT)-1)) >>
L2_PAGETABLE_SHIFT) + 5) <= nr_pt_pages )
if ( paging_mode_hap(d) )
{
__vmwrite(EPT_POINTER, d->arch.hvm_domain.vmx.ept_control.eptp);
-#ifdef CONFIG_X86_PAE
+#ifdef __i386__
__vmwrite(EPT_POINTER_HIGH,
d->arch.hvm_domain.vmx.ept_control.eptp >> 32);
#endif
__vmwrite(GUEST_PDPTR1, guest_pdptrs[1]);
__vmwrite(GUEST_PDPTR2, guest_pdptrs[2]);
__vmwrite(GUEST_PDPTR3, guest_pdptrs[3]);
-#ifdef CONFIG_X86_PAE
+#ifdef __i386__
__vmwrite(GUEST_PDPTR0_HIGH, guest_pdptrs[0] >> 32);
__vmwrite(GUEST_PDPTR1_HIGH, guest_pdptrs[1] >> 32);
__vmwrite(GUEST_PDPTR2_HIGH, guest_pdptrs[2] >> 32);
case EXIT_REASON_EPT_VIOLATION:
{
paddr_t gpa = __vmread(GUEST_PHYSICAL_ADDRESS);
-#ifdef CONFIG_X86_PAE
+#ifdef __i386__
gpa |= (paddr_t)__vmread(GUEST_PHYSICAL_ADDRESS_HIGH) << 32;
#endif
exit_qualification = __vmread(EXIT_QUALIFICATION);
* 1. Debug builds get extra checking by using CMPXCHG[8B].
* 2. PAE builds perform an atomic 8-byte store with CMPXCHG8B.
*/
-#if !defined(NDEBUG) || defined(CONFIG_X86_PAE)
+#if !defined(NDEBUG) || defined(__i386__)
#define PTE_UPDATE_WITH_CMPXCHG
#endif
share_xen_page_with_guest(page, dom_xen, readonly);
}
-#if defined(CONFIG_X86_PAE)
+#if defined(__i386__)
#ifdef NDEBUG
/* Only PDPTs above 4GB boundary need to be shadowed in low memory. */
spin_unlock(&cache->lock);
}
-#else /* !CONFIG_X86_PAE */
+#else /* !defined(__i386__) */
void make_cr3(struct vcpu *v, unsigned long mfn)
{
v->arch.cr3 = mfn << PAGE_SHIFT;
}
-#endif /* !CONFIG_X86_PAE */
+#endif /* !defined(__i386__) */
void write_ptbase(struct vcpu *v)
{
return 0;
}
-#if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
{
struct page_info *page;
return 1;
}
-#else
-# define create_pae_xen_mappings(d, pl3e) (1)
-#endif
-#ifdef CONFIG_X86_PAE
+#ifdef __i386__
/* Flush a pgdir update into low-memory caches. */
static void pae_flush_pgd(
unsigned long mfn, unsigned int idx, l3_pgentry_t nl3e)
adjust_guest_l2e(pl2e[i], d);
}
-#if CONFIG_PAGING_LEVELS == 2
- /* Xen private mappings. */
- memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
- L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
- pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l2e_from_pfn(pfn, __PAGE_HYPERVISOR);
- for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
- pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
- l2e_from_page(
- virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt) + i,
- __PAGE_HYPERVISOR);
-#endif
-
unmap_domain_page(pl2e);
return 1;
l3_pgentry_t *pl3e;
int i;
-#ifdef CONFIG_X86_PAE
+#if CONFIG_PAGING_LEVELS == 3
/*
* PAE pgdirs above 4GB are unacceptable if the guest does not understand
* the weird 'extended cr3' format for dealing with high-order address
for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
{
-#if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
if ( is_pv_32bit_domain(d) && (i == 3) )
{
if ( !(l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ||
d) )
goto fail;
}
- else
-#endif
- if ( is_guest_l3_slot(i) &&
- unlikely(!get_page_from_l3e(pl3e[i], pfn, d)) )
+ else if ( is_guest_l3_slot(i) &&
+ unlikely(!get_page_from_l3e(pl3e[i], pfn, d)) )
goto fail;
-
+
adjust_guest_l3e(pl3e[i], d);
}
return 0;
}
-#if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
/*
* Disallow updates to final L3 slot. It contains Xen mappings, and it
* would be a pain to ensure they remain continuously valid throughout.
*/
if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
return 0;
-#endif
if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
return 0;
}
#endif
-#if CONFIG_PAGING_LEVELS == 2
-static void hap_install_xen_entries_in_l2(struct vcpu *v, mfn_t l2mfn)
-{
- struct domain *d = v->domain;
- l2_pgentry_t *l2e;
- int i;
-
- l2e = hap_map_domain_page(l2mfn);
- ASSERT(l2e != NULL);
-
- /* Copy the common Xen mappings from the idle domain */
- memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
- L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-
- /* Install the per-domain mappings for this domain */
- for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
- l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
- l2e_from_pfn(
- mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
- __PAGE_HYPERVISOR);
-
- /* Install the linear mapping */
- l2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l2e_from_pfn(mfn_x(l2mfn), __PAGE_HYPERVISOR);
-
- /* Install the domain-specific P2M table */
- l2e[l2_table_offset(RO_MPT_VIRT_START)] =
- l2e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
- __PAGE_HYPERVISOR);
-
- hap_unmap_domain_page(l2e);
-}
-#endif
-
static mfn_t hap_make_monitor_table(struct vcpu *v)
{
struct domain *d = v->domain;
HAP_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
return m3mfn;
}
-#else
- {
- mfn_t m2mfn;
- if ( (pg = hap_alloc(d)) == NULL )
- goto oom;
- m2mfn = page_to_mfn(pg);;
- hap_install_xen_entries_in_l2(v, m2mfn);
- return m2mfn;
- }
#endif
oom:
PGT_l4_page_table
#elif CONFIG_PAGING_LEVELS == 3
PGT_l3_page_table
-#elif CONFIG_PAGING_LEVELS == 2
- PGT_l2_page_table
#endif
| 1 | PGT_validated;
-ifneq ($(pae),n)
obj-$(x86_32) += common.o g2_on_s3.o g3_on_s3.o
-else
-obj-$(x86_32) += common.o g2_on_s2.o
-endif
-
obj-$(x86_64) += common.o g4_on_s4.o g3_on_s3.o g2_on_s3.o
guest_levels = $(subst g,,$(filter g%,$(subst ., ,$(subst _, ,$(1)))))
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
#elif CONFIG_PAGING_LEVELS == 3
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
-#elif CONFIG_PAGING_LEVELS == 2
- v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);
#endif
}
if ( !(page->count_info & PGC_page_table) )
return 0; /* Not shadowed at all */
-#if CONFIG_PAGING_LEVELS == 2
- if ( page->shadow_flags & SHF_L1_32 )
- result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 2, 2)
- (v, gmfn, entry, size);
-#else
if ( page->shadow_flags & SHF_L1_32 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3, 2)
(v, gmfn, entry, size);
-#endif
-#if CONFIG_PAGING_LEVELS == 2
- if ( page->shadow_flags & SHF_L2_32 )
- result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 2, 2)
- (v, gmfn, entry, size);
-#else
if ( page->shadow_flags & SHF_L2_32 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3, 2)
(v, gmfn, entry, size);
-#endif
-#if CONFIG_PAGING_LEVELS >= 3
if ( page->shadow_flags & SHF_L1_PAE )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3, 3)
(v, gmfn, entry, size);
if ( page->shadow_flags & SHF_L2H_PAE )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3, 3)
(v, gmfn, entry, size);
-#else /* 32-bit non-PAE hypervisor does not support PAE guests */
- ASSERT((page->shadow_flags & (SHF_L2H_PAE|SHF_L2_PAE|SHF_L1_PAE)) == 0);
-#endif
#if CONFIG_PAGING_LEVELS >= 4
if ( page->shadow_flags & SHF_L1_64 )
if ( page->shadow_flags & SHF_L4_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4, 4)
(v, gmfn, entry, size);
-#else /* 32-bit/PAE hypervisor does not support 64-bit guests */
+#else /* 32-bit hypervisor does not support 64-bit guests */
ASSERT((page->shadow_flags
& (SHF_L4_64|SHF_L3_64|SHF_L2H_64|SHF_L2_64|SHF_L1_64)) == 0);
#endif
static inline u32
shadow_order(unsigned int shadow_type)
{
-#if CONFIG_PAGING_LEVELS > 2
static const u32 type_to_order[SH_type_unused] = {
0, /* SH_type_none */
1, /* SH_type_l1_32_shadow */
};
ASSERT(shadow_type < SH_type_unused);
return type_to_order[shadow_type];
-#else /* 32-bit Xen only ever shadows 32-bit guests on 32-bit shadows. */
- return 0;
-#endif
}
static inline unsigned int
switch ( sp->type )
{
case SH_type_l2_32_shadow:
-#if CONFIG_PAGING_LEVELS == 2
- SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings,2,2)(v,smfn);
-#else
SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings,3,2)(v,smfn);
-#endif
break;
-#if CONFIG_PAGING_LEVELS >= 3
case SH_type_l2_pae_shadow:
case SH_type_l2h_pae_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings,3,3)(v,smfn);
break;
-#endif
#if CONFIG_PAGING_LEVELS >= 4
case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings,4,4)(v,smfn);
* small numbers that the compiler will enjoy */
switch ( t )
{
-#if CONFIG_PAGING_LEVELS == 2
- case SH_type_l1_32_shadow:
- case SH_type_fl1_32_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2, 2)(v, smfn);
- break;
- case SH_type_l2_32_shadow:
- SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 2, 2)(v, smfn);
- break;
-#else /* PAE or 64bit */
case SH_type_l1_32_shadow:
case SH_type_fl1_32_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 2)(v, smfn);
case SH_type_l2_32_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 2)(v, smfn);
break;
-#endif
-#if CONFIG_PAGING_LEVELS >= 3
case SH_type_l1_pae_shadow:
case SH_type_fl1_pae_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 3, 3)(v, smfn);
case SH_type_l2h_pae_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3, 3)(v, smfn);
break;
-#endif
#if CONFIG_PAGING_LEVELS >= 4
case SH_type_l1_64_shadow:
/* Dispatch table for getting per-type functions */
static hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
-#if CONFIG_PAGING_LEVELS == 2
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,2,2), /* l1_32 */
- SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,2,2), /* fl1_32 */
-#else
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,2), /* fl1_32 */
-#endif
NULL, /* l2_32 */
-#if CONFIG_PAGING_LEVELS >= 3
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,3), /* l1_pae */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1,3,3), /* fl1_pae */
-#else
- NULL, /* l1_pae */
- NULL, /* fl1_pae */
-#endif
NULL, /* l2_pae */
NULL, /* l2h_pae */
#if CONFIG_PAGING_LEVELS >= 4
/* Dispatch table for getting per-type functions */
static hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
-#if CONFIG_PAGING_LEVELS == 2
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,2,2), /* l1_32 */
- SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,2,2), /* fl1_32 */
-#else
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,2), /* fl1_32 */
-#endif
NULL, /* l2_32 */
-#if CONFIG_PAGING_LEVELS >= 3
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,3), /* l1_pae */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1,3,3), /* fl1_pae */
-#else
- NULL, /* l1_pae */
- NULL, /* fl1_pae */
-#endif
NULL, /* l2_pae */
NULL, /* l2h_pae */
#if CONFIG_PAGING_LEVELS >= 4
{
case SH_type_l1_32_shadow:
case SH_type_l2_32_shadow:
-#if CONFIG_PAGING_LEVELS == 2
- SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,2,2)(v, vaddr, pmfn);
-#else
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,3,2)(v, vaddr, pmfn);
-#endif
break;
-#if CONFIG_PAGING_LEVELS >=3
case SH_type_l1_pae_shadow:
case SH_type_l2_pae_shadow:
case SH_type_l2h_pae_shadow:
case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry,4,4)(v, vaddr, pmfn);
break;
-#endif
#endif
default: BUG(); /* Some wierd unknown shadow type */
}
NULL, /* none */
NULL, /* l1_32 */
NULL, /* fl1_32 */
-#if CONFIG_PAGING_LEVELS == 2
- SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,2,2), /* l2_32 */
-#else
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,2), /* l2_32 */
-#endif
NULL, /* l1_pae */
NULL, /* fl1_pae */
-#if CONFIG_PAGING_LEVELS >= 3
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,3), /* l2_pae */
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow,3,3), /* l2h_pae */
-#else
- NULL, /* l2_pae */
- NULL, /* l2h_pae */
-#endif
NULL, /* l1_64 */
NULL, /* fl1_64 */
#if CONFIG_PAGING_LEVELS >= 4
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,4,4);
#elif CONFIG_PAGING_LEVELS == 3
v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,3,3);
-#elif CONFIG_PAGING_LEVELS == 2
- v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode,2,2);
#else
#error unexpected paging mode
#endif
/* Dispatch table for getting per-type functions */
static hash_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
-#if CONFIG_PAGING_LEVELS == 2
- SHADOW_INTERNAL_NAME(sh_audit_l1_table,2,2), /* l1_32 */
- SHADOW_INTERNAL_NAME(sh_audit_fl1_table,2,2), /* fl1_32 */
- SHADOW_INTERNAL_NAME(sh_audit_l2_table,2,2), /* l2_32 */
-#else
SHADOW_INTERNAL_NAME(sh_audit_l1_table,3,2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_audit_fl1_table,3,2), /* fl1_32 */
SHADOW_INTERNAL_NAME(sh_audit_l2_table,3,2), /* l2_32 */
SHADOW_INTERNAL_NAME(sh_audit_l3_table,4,4), /* l3_64 */
SHADOW_INTERNAL_NAME(sh_audit_l4_table,4,4), /* l4_64 */
#endif /* CONFIG_PAGING_LEVELS >= 4 */
-#endif /* CONFIG_PAGING_LEVELS > 2 */
NULL /* All the rest */
};
unsigned int mask;
#endif
-#if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2
-void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn)
-{
- struct domain *d = v->domain;
- shadow_l2e_t *sl2e;
- int i;
-
- sl2e = sh_map_domain_page(sl2mfn);
- ASSERT(sl2e != NULL);
- ASSERT(sizeof (l2_pgentry_t) == sizeof (shadow_l2e_t));
-
- /* Copy the common Xen mappings from the idle domain */
- memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
- L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
-
- /* Install the per-domain mappings for this domain */
- for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
- sl2e[shadow_l2_table_offset(PERDOMAIN_VIRT_START) + i] =
- shadow_l2e_from_mfn(
- page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i),
- __PAGE_HYPERVISOR);
-
- /* Linear mapping */
- sl2e[shadow_l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
- shadow_l2e_from_mfn(sl2mfn, __PAGE_HYPERVISOR);
-
- if ( shadow_mode_translate(v->domain) && !shadow_mode_external(v->domain) )
- {
- // linear tables may not be used with translated PV guests
- sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START)] =
- shadow_l2e_empty();
- }
- else
- {
- sl2e[shadow_l2_table_offset(LINEAR_PT_VIRT_START)] =
- shadow_l2e_from_mfn(gl2mfn, __PAGE_HYPERVISOR);
- }
-
- if ( shadow_mode_translate(d) )
- {
- /* install domain-specific P2M table */
- sl2e[shadow_l2_table_offset(RO_MPT_VIRT_START)] =
- shadow_l2e_from_mfn(pagetable_get_mfn(d->arch.phys_table),
- __PAGE_HYPERVISOR);
- }
-
- sh_unmap_domain_page(sl2e);
-}
-#endif
#if CONFIG_PAGING_LEVELS >= 3 && GUEST_PAGING_LEVELS >= 3
case SH_type_l2h_shadow:
sh_install_xen_entries_in_l2h(v, smfn); break;
-#endif
-#if CONFIG_PAGING_LEVELS == 2 && GUEST_PAGING_LEVELS == 2
- case SH_type_l2_shadow:
- sh_install_xen_entries_in_l2(v, gmfn, smfn); break;
#endif
default: /* Do nothing */ break;
}
return m3mfn;
}
-#elif CONFIG_PAGING_LEVELS == 2
-
- {
- mfn_t m2mfn;
- m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
- sh_install_xen_entries_in_l2(v, m2mfn, m2mfn);
- /* Remember the level of this table */
- mfn_to_page(m2mfn)->shadow_flags = 2;
- return m2mfn;
- }
-
#else
#error this should not happen
#endif /* CONFIG_PAGING_LEVELS */
sh_unmap_domain_page(l2e);
}
-#elif CONFIG_PAGING_LEVELS == 2
-
- /* For PV, one l2e points at the guest l2, one points at the shadow
- * l2. No maintenance required.
- * For HVM, just need to update the l2e that points to the shadow l2. */
-
- if ( shadow_mode_external(d) )
- {
- /* Use the linear map if we can; otherwise make a new mapping */
- if ( v == current )
- {
- __linear_l2_table[l2_linear_offset(SH_LINEAR_PT_VIRT_START)] =
- l2e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR);
- }
- else
- {
- l2_pgentry_t *ml2e;
- ml2e = sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
- ml2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
- l2e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR);
- sh_unmap_domain_page(ml2e);
- }
- }
-
#else
#error this should not happen
#endif
#define SHADOW_INTERNAL_NAME(name, shadow_levels, guest_levels) \
SHADOW_INTERNAL_NAME_HIDDEN(name, shadow_levels, guest_levels)
-#if CONFIG_PAGING_LEVELS == 2
-#define GUEST_LEVELS 2
-#define SHADOW_LEVELS 2
-#include "multi.h"
-#undef GUEST_LEVELS
-#undef SHADOW_LEVELS
-#endif /* CONFIG_PAGING_LEVELS == 2 */
-
#if CONFIG_PAGING_LEVELS == 3
#define GUEST_LEVELS 2
#define SHADOW_LEVELS 3
/* Install the xen mappings in various flavours of shadow */
void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);
-void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
/* Update the shadows in response to a pagetable write from Xen */
int sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size);
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
-#if CONFIG_PAGING_LEVELS > 2
unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
-#else
-unsigned long mmu_cr4_features = X86_CR4_PSE;
-#endif
EXPORT_SYMBOL(mmu_cr4_features);
int acpi_disabled;
(*info)[0] = '\0';
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
-
- snprintf(s, sizeof(s), "xen-%d.%d-x86_32 ", major, minor);
- safe_strcat(*info, s);
- if ( hvm_enabled )
- {
- snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor);
- safe_strcat(*info, s);
- }
-
-#elif defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
+#if defined(CONFIG_X86_32)
snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor);
safe_strcat(*info, s);
#if CONFIG_PAGING_LEVELS >= 3
l3t = map_domain_page(mfn);
-#ifdef CONFIG_X86_PAE
+#if CONFIG_PAGING_LEVELS == 3
l3t += (cr3 & 0xFE0UL) >> 3;
#endif
l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]);
mfn = l3e_get_pfn(l3e);
unmap_domain_page(l3t);
-#ifdef CONFIG_X86_PAE
+#if CONFIG_PAGING_LEVELS == 3
if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
return 0;
#else
#include <asm/fixmap.h>
#include <public/memory.h>
-#ifdef CONFIG_X86_PAE
l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
idle_pg_table_l2[4 * L2_PAGETABLE_ENTRIES];
-#else
-l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
- idle_pg_table_l2[L2_PAGETABLE_ENTRIES];
-#endif
extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES];
struct page_info *pg;
int i;
-#ifdef CONFIG_X86_PAE
- printk("PAE enabled, limit: %d GB\n", MACHPHYS_MBYTES);
-#else
- printk("PAE disabled.\n");
-#endif
-
if ( cpu_has_pge )
{
/* Suitable Xen mapping can be GLOBAL. */
static void print_xen_info(void)
{
char taint_str[TAINT_STRING_MAX_LEN];
- char debug = 'n', *arch = "x86_32";
+ char debug = 'n', *arch = "x86_32p";
#ifndef NDEBUG
debug = 'y';
#endif
-#ifdef CONFIG_X86_PAE
- arch = "x86_32p";
-#endif
-
printk("----[ Xen-%d.%d%s %s debug=%c %s ]----\n",
xen_major_version(), xen_minor_version(), xen_extra_version(),
arch, debug, print_tainted(taint_str));
void show_page_walk(unsigned long addr)
{
unsigned long pfn, mfn, cr3 = read_cr3();
-#ifdef CONFIG_X86_PAE
l3_pgentry_t l3e, *l3t;
-#endif
l2_pgentry_t l2e, *l2t;
l1_pgentry_t l1e, *l1t;
mfn = cr3 >> PAGE_SHIFT;
-#ifdef CONFIG_X86_PAE
l3t = map_domain_page(mfn);
l3t += (cr3 & 0xFE0UL) >> 3;
l3e = l3t[l3_table_offset(addr)];
unmap_domain_page(l3t);
if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
return;
-#endif
l2t = map_domain_page(mfn);
l2e = l2t[l2_table_offset(addr)];
#if defined(__x86_64__)
# define CONFIG_PAGING_LEVELS 4
-#elif defined(CONFIG_X86_PAE)
-# define CONFIG_PAGING_LEVELS 3
#else
-# define CONFIG_PAGING_LEVELS 2
+# define CONFIG_PAGING_LEVELS 3
#endif
#define CONFIG_X86 1
#define asmlinkage __attribute__((regparm(0)))
/*
- * Memory layout (high to low): SIZE PAE-SIZE
- * ------ ------
+ * Memory layout (high to low): PAE-SIZE
+ * ------
* I/O remapping area ( 4MB)
* Direct-map (1:1) area [Xen code/data/heap] (12MB)
* Per-domain mappings (inc. 4MB map_domain_page cache) ( 8MB)
- * Shadow linear pagetable ( 4MB) ( 8MB)
- * Guest linear pagetable ( 4MB) ( 8MB)
- * Machine-to-physical translation table [writable] ( 4MB) (16MB)
- * Frame-info table (24MB) (96MB)
+ * Shadow linear pagetable ( 8MB)
+ * Guest linear pagetable ( 8MB)
+ * Machine-to-physical translation table [writable] (16MB)
+ * Frame-info table (96MB)
* * Start of guest inaccessible area
- * Machine-to-physical translation table [read-only] ( 4MB) (16MB)
+ * Machine-to-physical translation table [read-only] (16MB)
* * Start of guest unmodifiable area
*/
#define MAPCACHE_MBYTES 4
#define PERDOMAIN_MBYTES 8
-#ifdef CONFIG_X86_PAE
-# define LINEARPT_MBYTES 8
-# define MACHPHYS_MBYTES 16 /* 1 MB needed per 1 GB memory */
-# define FRAMETABLE_MBYTES (MACHPHYS_MBYTES * 6)
-#else
-# define LINEARPT_MBYTES 4
-# define MACHPHYS_MBYTES 4
-# define FRAMETABLE_MBYTES 24
-#endif
+#define LINEARPT_MBYTES 8
+#define MACHPHYS_MBYTES 16 /* 1 MB needed per 1 GB memory */
+#define FRAMETABLE_MBYTES (MACHPHYS_MBYTES * 6)
#define IOREMAP_VIRT_END 0UL
#define IOREMAP_VIRT_START (IOREMAP_VIRT_END - (IOREMAP_MBYTES<<20))
/* Maximum linear address accessible via guest memory segments. */
#define GUEST_SEGMENT_MAX_ADDR RO_MPT_VIRT_END
-#ifdef CONFIG_X86_PAE
/* Hypervisor owns top 168MB of virtual address space. */
#define HYPERVISOR_VIRT_START mk_unsigned_long(0xF5800000)
-#else
-/* Hypervisor owns top 64MB of virtual address space. */
-#define HYPERVISOR_VIRT_START mk_unsigned_long(0xFC000000)
-#endif
#define L2_PAGETABLE_FIRST_XEN_SLOT \
(HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
#define L2_PAGETABLE_XEN_SLOTS \
(L2_PAGETABLE_LAST_XEN_SLOT - L2_PAGETABLE_FIRST_XEN_SLOT + 1)
-#ifdef CONFIG_X86_PAE
-# define PGT_base_page_table PGT_l3_page_table
-#else
-# define PGT_base_page_table PGT_l2_page_table
-#endif
+#define PGT_base_page_table PGT_l3_page_table
#define __HYPERVISOR_CS 0xe008
#define __HYPERVISOR_DS 0xe010
cpuid_input_t cpuids[MAX_CPUID_INPUT];
} __cacheline_aligned;
-#ifdef CONFIG_X86_PAE
+#ifdef __i386__
struct pae_l3_cache {
/*
* Two low-memory (<4GB) PAE L3 tables, used as fallback when the guest
spinlock_t lock;
};
#define pae_l3_cache_init(c) spin_lock_init(&(c)->lock)
-#else /* !CONFIG_X86_PAE */
+#else /* !defined(__i386__) */
struct pae_l3_cache { };
#define pae_l3_cache_init(c) ((void)0)
#endif
* from the end of virtual memory backwards.
*/
enum fixed_addresses {
-#ifdef CONFIG_X86_PAE
+#ifdef __i386__
FIX_PAE_HIGHMEM_0,
FIX_PAE_HIGHMEM_END = FIX_PAE_HIGHMEM_0 + NR_CPUS-1,
#endif
#endif
/* The order of the largest allocation unit we use for shadow pages */
-#if CONFIG_PAGING_LEVELS == 2
-#define SHADOW_MAX_ORDER 0 /* Only ever need 4k allocations */
-#else
#define SHADOW_MAX_ORDER 2 /* Need up to 16k allocs for 32-bit on PAE/64 */
-#endif
#define page_get_owner(_p) (unpickle_domptr((_p)->u.inuse._domain))
#define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
return (l2_pgentry_t) { pa | put_pte_flags(flags) };
}
-#if CONFIG_PAGING_LEVELS >= 3
static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
{
ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
return (l3_pgentry_t) { pa | put_pte_flags(flags) };
}
-#endif
#if CONFIG_PAGING_LEVELS >= 4
static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
{
#ifndef __ASSEMBLY__
/* Page-table type. */
-#if CONFIG_PAGING_LEVELS == 2
-/* x86_32 default */
-typedef struct { u32 pfn; } pagetable_t;
-#elif CONFIG_PAGING_LEVELS == 3
+#if CONFIG_PAGING_LEVELS == 3
/* x86_32 PAE */
typedef struct { u32 pfn; } pagetable_t;
#elif CONFIG_PAGING_LEVELS == 4
#endif /* !defined(__ASSEMBLY__) */
/* High table entries are reserved by the hypervisor. */
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
-#define DOMAIN_ENTRIES_PER_L2_PAGETABLE \
- (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
-#define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
- (L2_PAGETABLE_ENTRIES - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
-#else
#define DOMAIN_ENTRIES_PER_L2_PAGETABLE 0
#define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE 0
#define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \
(L4_PAGETABLE_ENTRIES - GUEST_ENTRIES_PER_L4_PAGETABLE \
+ DOMAIN_ENTRIES_PER_L4_PAGETABLE)
-#endif
/* Where to find each level of the linear mapping */
#define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
#if CONFIG_PAGING_LEVELS == 3
extern l2_pgentry_t idle_pg_table_l2[
ROOT_PAGETABLE_ENTRIES * L2_PAGETABLE_ENTRIES];
-#elif CONFIG_PAGING_LEVELS == 2
-#define idle_pg_table_l2 idle_pg_table
#elif CONFIG_PAGING_LEVELS == 4
extern l2_pgentry_t *compat_idle_pg_table_l2;
extern unsigned int m2p_compat_vstart;
#if defined(__i386__)
typedef signed long long s64;
typedef unsigned long long u64;
-#if defined(CONFIG_X86_PAE)
typedef u64 paddr_t;
#define INVALID_PADDR (~0ULL)
#define PRIpaddr "016llx"
-#else
-typedef unsigned long paddr_t;
-#define INVALID_PADDR (~0UL)
-#define PRIpaddr "08lx"
-#endif
#elif defined(__x86_64__)
typedef signed long s64;
typedef unsigned long u64;
+++ /dev/null
-#ifndef __X86_32_PAGE_2LEVEL_H__
-#define __X86_32_PAGE_2LEVEL_H__
-
-#define L1_PAGETABLE_SHIFT 12
-#define L2_PAGETABLE_SHIFT 22
-#define PAGE_SHIFT L1_PAGETABLE_SHIFT
-#define ROOT_PAGETABLE_SHIFT L2_PAGETABLE_SHIFT
-
-#define PAGETABLE_ORDER 10
-#define L1_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
-#define L2_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
-#define ROOT_PAGETABLE_ENTRIES L2_PAGETABLE_ENTRIES
-
-#define PADDR_BITS 32
-#define PADDR_MASK (~0UL)
-
-#ifndef __ASSEMBLY__
-
-#include <asm/types.h>
-
-/* read access (should only be used for debug printk's) */
-typedef u32 intpte_t;
-#define PRIpte "08x"
-
-typedef struct { intpte_t l1; } l1_pgentry_t;
-typedef struct { intpte_t l2; } l2_pgentry_t;
-typedef l2_pgentry_t root_pgentry_t;
-
-#endif /* !__ASSEMBLY__ */
-
-#define pte_read_atomic(ptep) (*(ptep))
-#define pte_write_atomic(ptep, pte) (*(ptep) = (pte))
-#define pte_write(ptep, pte) (*(ptep) = (pte))
-
-/* root table */
-#define root_get_pfn l2e_get_pfn
-#define root_get_flags l2e_get_flags
-#define root_get_intpte l2e_get_intpte
-#define root_empty l2e_empty
-#define root_from_paddr l2e_from_paddr
-#define PGT_root_page_table PGT_l2_page_table
-
-/* misc */
-#define is_guest_l1_slot(_s) (1)
-#define is_guest_l2_slot(_d, _t,_s) ((_s) < L2_PAGETABLE_FIRST_XEN_SLOT)
-
-/*
- * PTE pfn and flags:
- * 20-bit pfn = (pte[31:12])
- * 12-bit flags = (pte[11:0])
- */
-
-#define _PAGE_NX_BIT 0U
-#define _PAGE_NX 0U
-
-/* Extract flags into 12-bit integer, or turn 12-bit flags into a pte mask. */
-#define get_pte_flags(x) ((int)(x) & 0xFFF)
-#define put_pte_flags(x) ((intpte_t)((x) & 0xFFF))
-
-#endif /* __X86_32_PAGE_2LEVEL_H__ */
+++ /dev/null
-#ifndef __X86_32_PAGE_3LEVEL_H__
-#define __X86_32_PAGE_3LEVEL_H__
-
-#define L1_PAGETABLE_SHIFT 12
-#define L2_PAGETABLE_SHIFT 21
-#define L3_PAGETABLE_SHIFT 30
-#define PAGE_SHIFT L1_PAGETABLE_SHIFT
-#define ROOT_PAGETABLE_SHIFT L3_PAGETABLE_SHIFT
-
-#define PAGETABLE_ORDER 9
-#define L1_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
-#define L2_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
-#define L3_PAGETABLE_ENTRIES 4
-#define ROOT_PAGETABLE_ENTRIES L3_PAGETABLE_ENTRIES
-
-/*
- * Architecturally, physical addresses may be up to 52 bits. However, the
- * page-frame number (pfn) of a 52-bit address will not fit into a 32-bit
- * word. Instead we treat bits 44-51 of a pte as flag bits which are never
- * allowed to be set by a guest kernel. This 'limits' us to addressing 16TB
- * of physical memory on a 32-bit PAE system.
- */
-#define PADDR_BITS 44
-#define PADDR_MASK ((1ULL << PADDR_BITS)-1)
-
-#ifndef __ASSEMBLY__
-
-#include <asm/types.h>
-
-/* read access (should only be used for debug printk's) */
-typedef u64 intpte_t;
-#define PRIpte "016llx"
-
-typedef struct { intpte_t l1; } l1_pgentry_t;
-typedef struct { intpte_t l2; } l2_pgentry_t;
-typedef struct { intpte_t l3; } l3_pgentry_t;
-typedef l3_pgentry_t root_pgentry_t;
-
-#endif /* !__ASSEMBLY__ */
-
-#define pte_read_atomic(ptep) ({ \
- intpte_t __pte = *(ptep), __npte; \
- while ( (__npte = cmpxchg(ptep, __pte, __pte)) != __pte ) \
- __pte = __npte; \
- __pte; })
-#define pte_write_atomic(ptep, pte) do { \
- intpte_t __pte = *(ptep), __npte; \
- while ( (__npte = cmpxchg(ptep, __pte, (pte))) != __pte ) \
- __pte = __npte; \
-} while ( 0 )
-#define pte_write(ptep, pte) do { \
- u32 *__ptep_words = (u32 *)(ptep); \
- __ptep_words[0] = 0; \
- wmb(); \
- __ptep_words[1] = (pte) >> 32; \
- wmb(); \
- __ptep_words[0] = (pte) >> 0; \
-} while ( 0 )
-
-/* root table */
-#define root_get_pfn l3e_get_pfn
-#define root_get_flags l3e_get_flags
-#define root_get_intpte l3e_get_intpte
-#define root_empty l3e_empty
-#define root_from_paddr l3e_from_paddr
-#define PGT_root_page_table PGT_l3_page_table
-
-/* misc */
-#define is_guest_l1_slot(s) (1)
-#define is_guest_l2_slot(d,t,s) \
- ( !((t) & PGT_pae_xen_l2) || \
- ((s) < (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES - 1))) )
-#define is_guest_l3_slot(s) (1)
-
-/*
- * PTE pfn and flags:
- * 32-bit pfn = (pte[43:12])
- * 32-bit flags = (pte[63:44],pte[11:0])
- */
-
-#define _PAGE_NX_BIT (1U<<31)
-#define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0)
-
-/* Extract flags into 32-bit integer, or turn 32-bit flags into a pte mask. */
-#define get_pte_flags(x) (((int)((x) >> 32) & ~0xFFF) | ((int)(x) & 0xFFF))
-#define put_pte_flags(x) (((intpte_t)((x) & ~0xFFF) << 32) | ((x) & 0xFFF))
-
-#define L3_DISALLOW_MASK 0xFFFFF1FEU /* must-be-zero */
-
-#endif /* __X86_32_PAGE_3LEVEL_H__ */
#ifndef __X86_32_PAGE_H__
#define __X86_32_PAGE_H__
+#define L1_PAGETABLE_SHIFT 12
+#define L2_PAGETABLE_SHIFT 21
+#define L3_PAGETABLE_SHIFT 30
+#define PAGE_SHIFT L1_PAGETABLE_SHIFT
+#define ROOT_PAGETABLE_SHIFT L3_PAGETABLE_SHIFT
+
+#define PAGETABLE_ORDER 9
+#define L1_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
+#define L2_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
+#define L3_PAGETABLE_ENTRIES 4
+#define ROOT_PAGETABLE_ENTRIES L3_PAGETABLE_ENTRIES
+
+/*
+ * Architecturally, physical addresses may be up to 52 bits. However, the
+ * page-frame number (pfn) of a 52-bit address will not fit into a 32-bit
+ * word. Instead we treat bits 44-51 of a pte as flag bits which are never
+ * allowed to be set by a guest kernel. This 'limits' us to addressing 16TB
+ * of physical memory on a 32-bit PAE system.
+ */
+#define PADDR_BITS 44
+#define PADDR_MASK ((1ULL << PADDR_BITS)-1)
+
#define __PAGE_OFFSET (0xFF000000)
#define __XEN_VIRT_START __PAGE_OFFSET
#define is_canonical_address(x) 1
-#include <xen/config.h>
-#ifdef CONFIG_X86_PAE
-# include <asm/x86_32/page-3level.h>
-#else
-# include <asm/x86_32/page-2level.h>
-#endif
-
/* Given a virtual address, get an entry offset into a linear page table. */
#define l1_linear_offset(_a) ((_a) >> L1_PAGETABLE_SHIFT)
#define l2_linear_offset(_a) ((_a) >> L2_PAGETABLE_SHIFT)
#ifndef __ASSEMBLY__
+
+#include <xen/config.h>
+#include <asm/types.h>
+
+/* read access (should only be used for debug printk's) */
+typedef u64 intpte_t;
+#define PRIpte "016llx"
+
+typedef struct { intpte_t l1; } l1_pgentry_t;
+typedef struct { intpte_t l2; } l2_pgentry_t;
+typedef struct { intpte_t l3; } l3_pgentry_t;
+typedef l3_pgentry_t root_pgentry_t;
+
extern unsigned int PAGE_HYPERVISOR;
extern unsigned int PAGE_HYPERVISOR_NOCACHE;
+
#endif
+#define pte_read_atomic(ptep) ({ \
+ intpte_t __pte = *(ptep), __npte; \
+ while ( (__npte = cmpxchg(ptep, __pte, __pte)) != __pte ) \
+ __pte = __npte; \
+ __pte; })
+#define pte_write_atomic(ptep, pte) do { \
+ intpte_t __pte = *(ptep), __npte; \
+ while ( (__npte = cmpxchg(ptep, __pte, (pte))) != __pte ) \
+ __pte = __npte; \
+} while ( 0 )
+#define pte_write(ptep, pte) do { \
+ u32 *__ptep_words = (u32 *)(ptep); \
+ __ptep_words[0] = 0; \
+ wmb(); \
+ __ptep_words[1] = (pte) >> 32; \
+ wmb(); \
+ __ptep_words[0] = (pte) >> 0; \
+} while ( 0 )
+
+/* root table */
+#define root_get_pfn l3e_get_pfn
+#define root_get_flags l3e_get_flags
+#define root_get_intpte l3e_get_intpte
+#define root_empty l3e_empty
+#define root_from_paddr l3e_from_paddr
+#define PGT_root_page_table PGT_l3_page_table
+
+/* misc */
+#define is_guest_l1_slot(s) (1)
+#define is_guest_l2_slot(d,t,s) \
+ ( !((t) & PGT_pae_xen_l2) || \
+ ((s) < (L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES - 1))) )
+#define is_guest_l3_slot(s) (1)
+
+/*
+ * PTE pfn and flags:
+ * 32-bit pfn = (pte[43:12])
+ * 32-bit flags = (pte[63:44],pte[11:0])
+ */
+
+#define _PAGE_NX_BIT (1U<<31)
+#define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0)
+
+/* Extract flags into 32-bit integer, or turn 32-bit flags into a pte mask. */
+#define get_pte_flags(x) (((int)((x) >> 32) & ~0xFFF) | ((int)(x) & 0xFFF))
+#define put_pte_flags(x) (((intpte_t)((x) & ~0xFFF) << 32) | ((x) & 0xFFF))
+
#define GRANT_PTE_FLAGS \
(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_GNTTAB)
#define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK)
+#define L3_DISALLOW_MASK 0xFFFFF1FEU /* must-be-zero */
#endif /* __X86_32_PAGE_H__ */
#define MACH2PHYS_VIRT_END_PAE \
mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
+/* Non-PAE bounds are obsolete. */
#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
#define MACH2PHYS_VIRT_END_NONPAE \
mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
-#ifdef CONFIG_X86_PAE
#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
-#else
-#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_NONPAE
-#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_NONPAE
-#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_NONPAE
-#endif
#ifndef HYPERVISOR_VIRT_START
#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)